diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -643,10 +643,20 @@ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); // Store new_vl. + llvm::Value *Cmp = + Builder.CreateCmp(llvm::CmpInst::ICMP_NE, NewVL, + ConstantPointerNull::get( + cast(NewVL->getType()))); + BasicBlock *NewVLStore = createBasicBlock("newvl_store", this->CurFn); + BasicBlock *NewVLEnd = createBasicBlock("newvl_end", this->CurFn); + Builder.CreateCondBr(Cmp, NewVLStore, NewVLEnd); + Builder.SetInsertPoint(NewVLStore); clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType()); llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1}); Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + Builder.CreateBr(NewVLEnd); + Builder.SetInsertPoint(NewVLEnd); return V; } }], @@ -663,10 +673,20 @@ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); // Store new_vl. + llvm::Value *Cmp = + Builder.CreateCmp(llvm::CmpInst::ICMP_NE, NewVL, + ConstantPointerNull::get( + cast(NewVL->getType()))); + BasicBlock *NewVLStore = createBasicBlock("newvl_store", this->CurFn); + BasicBlock *NewVLEnd = createBasicBlock("newvl_end", this->CurFn); + Builder.CreateCondBr(Cmp, NewVLStore, NewVLEnd); + Builder.SetInsertPoint(NewVLStore); clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(3)->getType()); llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1}); Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + Builder.CreateBr(NewVLEnd); + Builder.SetInsertPoint(NewVLEnd); return V; } }] in { @@ -966,8 +986,19 @@ Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align)); } // Store new_vl. + llvm::Value *Cmp = Builder.CreateCmp( + llvm::CmpInst::ICMP_NE, NewVL, + ConstantPointerNull::get(cast(NewVL->getType()))); + BasicBlock *NewVLStore = createBasicBlock("newvl_store", this->CurFn); + BasicBlock *NewVLEnd = createBasicBlock("newvl_end", this->CurFn); + Builder.CreateCondBr(Cmp, NewVLStore, NewVLEnd); + Builder.SetInsertPoint(NewVLStore); llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {NF}); - return Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + llvm::Value *StoreNewVL = + Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + Builder.CreateBr(NewVLEnd); + Builder.SetInsertPoint(NewVLEnd); + return StoreNewVL; } }], MaskedManualCodegen = [{ @@ -994,8 +1025,19 @@ Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align)); } // Store new_vl. + llvm::Value *Cmp = Builder.CreateCmp( + llvm::CmpInst::ICMP_NE, NewVL, + ConstantPointerNull::get(cast(NewVL->getType()))); + BasicBlock *NewVLStore = createBasicBlock("newvl_store", this->CurFn); + BasicBlock *NewVLEnd = createBasicBlock("newvl_end", this->CurFn); + Builder.CreateCondBr(Cmp, NewVLStore, NewVLEnd); + Builder.SetInsertPoint(NewVLStore); llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {NF}); - return Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + llvm::Value *StoreNewVL = + Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + Builder.CreateBr(NewVLEnd); + Builder.SetInsertPoint(NewVLEnd); + return StoreNewVL; } }] in { defvar PV = PVString.S; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff-optimized.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff-optimized.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff-optimized.c @@ -0,0 +1,306 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-O0 %s +// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -O2 | FileCheck --check-prefix=CHECK-RV64-O2 %s + +#include + +// CHECK-RV64-O0-LABEL: @func_return_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: ret i64* null +// +// CHECK-RV64-O2-LABEL: @func_return_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: ret i64* null +// +size_t *__attribute__((noinline)) func_return_nullptr() { + return NULL; +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_not_null( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_not_null( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[DOTNOT:%.*]] = icmp eq i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O2-NEXT: br i1 [[DOTNOT]], label [[NEWVL_END:%.*]], label [[NEWVL_STORE:%.*]] +// CHECK-RV64-O2: newvl_store: +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O2-NEXT: store i64 [[TMP2]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O2-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O2: newvl_end: +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP3]] +// +vint8mf8_t test_vleff_save_new_vl_to_not_null(const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf8(base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_not_null_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_not_null_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[DOTNOT:%.*]] = icmp eq i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O2-NEXT: br i1 [[DOTNOT]], label [[NEWVL_END:%.*]], label [[NEWVL_STORE:%.*]] +// CHECK-RV64-O2: newvl_store: +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O2-NEXT: store i64 [[TMP2]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O2-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O2: newvl_end: +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP3]] +// +vint8mf8_t test_vleff_save_new_vl_to_not_null_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_direct_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: br i1 false, label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP3]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_direct_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_direct_nullptr(const int8_t *base, size_t vl) { + return vle8ff_v_i8mf8(base, NULL, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_direct_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: br i1 false, label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP3]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_direct_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_direct_nullptr_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + return vle8ff_v_i8mf8_m(mask, maskedoff, base, NULL, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* null, null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_indirect_nullptr(const int8_t *base, size_t vl) { + size_t *new_vl = NULL; + return vle8ff_v_i8mf8(base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* null, null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_indirect_nullptr_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + size_t *new_vl = NULL; + return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[CALL:%.*]] = call i64* @func_return_nullptr() +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[CALL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[CALL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_cross_func_nullptr(const int8_t *base, size_t vl) { + size_t *new_vl = func_return_nullptr(); + return vle8ff_v_i8mf8(base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[CALL:%.*]] = call i64* @func_return_nullptr() +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[CALL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[CALL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_cross_func_nullptr_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + size_t *new_vl = func_return_nullptr(); + return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_known_not_null( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[NEW_VL:%.*]] = alloca i64, align 8 +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: [[TMP5:%.*]] = load i64, i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: store i64 [[TMP5]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_known_not_null( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: store i64 [[TMP2]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O2-NEXT: ret [[TMP3]] +// +vint8mf8_t test_vleff_save_new_vl_to_known_not_null(const int8_t *base, size_t vl, size_t *out_new_vl) { + size_t new_vl; + vint8mf8_t v = vle8ff_v_i8mf8(base, &new_vl, vl); + *out_new_vl = new_vl; + return v; +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_known_not_null_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[NEW_VL:%.*]] = alloca i64, align 8 +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: [[TMP5:%.*]] = load i64, i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: store i64 [[TMP5]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_known_not_null_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: store i64 [[TMP2]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O2-NEXT: ret [[TMP3]] +// +vint8mf8_t test_vleff_save_new_vl_to_known_not_null_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl, size_t *out_new_vl) { + size_t new_vl; + vint8mf8_t v = vle8ff_v_i8mf8_m(mask, maskedoff, base, &new_vl, vl); + *out_new_vl = new_vl; + return v; +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c @@ -11,8 +11,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf8_t test_vle8ff_v_i8mf8 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -24,8 +29,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf4_t test_vle8ff_v_i8mf4 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -37,8 +47,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf2_t test_vle8ff_v_i8mf2 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -50,8 +65,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m1_t test_vle8ff_v_i8m1 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -63,8 +83,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m2_t test_vle8ff_v_i8m2 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -76,8 +101,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m4_t test_vle8ff_v_i8m4 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -89,8 +119,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m8_t test_vle8ff_v_i8m8 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -102,8 +137,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf8_t test_vle8ff_v_u8mf8 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -115,8 +155,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf4_t test_vle8ff_v_u8mf4 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -128,8 +173,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf2_t test_vle8ff_v_u8mf2 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -141,8 +191,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m1_t test_vle8ff_v_u8m1 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -154,8 +209,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m2_t test_vle8ff_v_u8m2 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -167,8 +227,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m4_t test_vle8ff_v_u8m4 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -180,8 +245,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m8_t test_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -193,8 +263,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf8_t test_vle8ff_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -206,8 +281,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf4_t test_vle8ff_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -219,8 +299,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf2_t test_vle8ff_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -232,8 +317,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m1_t test_vle8ff_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -245,8 +335,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m2_t test_vle8ff_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -258,8 +353,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m4_t test_vle8ff_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -271,8 +371,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m8_t test_vle8ff_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -284,8 +389,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf8_t test_vle8ff_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -297,8 +407,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf4_t test_vle8ff_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -310,8 +425,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf2_t test_vle8ff_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -323,8 +443,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m1_t test_vle8ff_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -336,8 +461,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m2_t test_vle8ff_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -349,8 +479,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m4_t test_vle8ff_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -362,8 +497,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m8_t test_vle8ff_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -375,8 +515,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf4_t test_vle16ff_v_i16mf4 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -388,8 +533,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf2_t test_vle16ff_v_i16mf2 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -401,8 +551,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m1_t test_vle16ff_v_i16m1 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -414,8 +569,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m2_t test_vle16ff_v_i16m2 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -427,8 +587,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m4_t test_vle16ff_v_i16m4 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -440,8 +605,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m8_t test_vle16ff_v_i16m8 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -453,8 +623,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf4_t test_vle16ff_v_u16mf4 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -466,8 +641,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf2_t test_vle16ff_v_u16mf2 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -479,8 +659,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m1_t test_vle16ff_v_u16m1 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -492,8 +677,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m2_t test_vle16ff_v_u16m2 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -505,8 +695,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m4_t test_vle16ff_v_u16m4 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -518,8 +713,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m8_t test_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -531,8 +731,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf4_t test_vle16ff_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -544,8 +749,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf2_t test_vle16ff_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -557,8 +767,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m1_t test_vle16ff_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -570,8 +785,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m2_t test_vle16ff_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -583,8 +803,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m4_t test_vle16ff_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -596,8 +821,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m8_t test_vle16ff_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -609,8 +839,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf4_t test_vle16ff_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -622,8 +857,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf2_t test_vle16ff_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -635,8 +875,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m1_t test_vle16ff_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -648,8 +893,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m2_t test_vle16ff_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -661,8 +911,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m4_t test_vle16ff_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -674,8 +929,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m8_t test_vle16ff_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -687,8 +947,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -700,8 +965,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -713,8 +983,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -726,8 +1001,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -739,8 +1019,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -752,8 +1037,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -765,8 +1055,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -778,8 +1073,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -791,8 +1091,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -804,8 +1109,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -817,8 +1127,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t vl) { @@ -830,8 +1145,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl) { @@ -843,8 +1163,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl) { @@ -856,8 +1181,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl) { @@ -869,8 +1199,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl) { @@ -882,8 +1217,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -895,8 +1235,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -908,8 +1253,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -921,8 +1271,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -934,8 +1289,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -947,8 +1307,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -960,8 +1325,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -973,8 +1343,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -986,8 +1361,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -999,8 +1379,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -1012,8 +1397,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1025,8 +1415,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1038,8 +1433,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1051,8 +1451,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1064,8 +1469,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1077,8 +1487,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m1_t test_vle64ff_v_i64m1 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1090,8 +1505,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m2_t test_vle64ff_v_i64m2 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1103,8 +1523,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m4_t test_vle64ff_v_i64m4 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1116,8 +1541,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m8_t test_vle64ff_v_i64m8 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1129,8 +1559,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m1_t test_vle64ff_v_u64m1 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1142,8 +1577,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m2_t test_vle64ff_v_u64m2 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1155,8 +1595,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m4_t test_vle64ff_v_u64m4 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1168,8 +1613,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m8_t test_vle64ff_v_u64m8 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1181,8 +1631,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m1_t test_vle64ff_v_f64m1 (const double *base, size_t *new_vl, size_t vl) { @@ -1194,8 +1649,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m2_t test_vle64ff_v_f64m2 (const double *base, size_t *new_vl, size_t vl) { @@ -1207,8 +1667,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m4_t test_vle64ff_v_f64m4 (const double *base, size_t *new_vl, size_t vl) { @@ -1220,8 +1685,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m8_t test_vle64ff_v_f64m8 (const double *base, size_t *new_vl, size_t vl) { @@ -1233,8 +1703,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m1_t test_vle64ff_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1246,8 +1721,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m2_t test_vle64ff_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1259,8 +1739,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m4_t test_vle64ff_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1272,8 +1757,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m8_t test_vle64ff_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1285,8 +1775,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m1_t test_vle64ff_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1298,8 +1793,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m2_t test_vle64ff_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1311,8 +1811,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m4_t test_vle64ff_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1324,8 +1829,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m8_t test_vle64ff_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1337,8 +1847,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m1_t test_vle64ff_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1350,8 +1865,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m2_t test_vle64ff_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1363,8 +1883,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m4_t test_vle64ff_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1376,8 +1901,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m8_t test_vle64ff_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1389,8 +1919,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf4_t test_vle16ff_v_f16mf4 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1402,8 +1937,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf2_t test_vle16ff_v_f16mf2 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1415,8 +1955,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m1_t test_vle16ff_v_f16m1 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1428,8 +1973,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m2_t test_vle16ff_v_f16m2 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1441,8 +1991,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m4_t test_vle16ff_v_f16m4 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1454,8 +2009,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m8_t test_vle16ff_v_f16m8 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1467,8 +2027,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf4_t test_vle16ff_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1480,8 +2045,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf2_t test_vle16ff_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1493,8 +2063,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m1_t test_vle16ff_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1506,8 +2081,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m2_t test_vle16ff_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1519,8 +2099,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m4_t test_vle16ff_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1532,8 +2117,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m8_t test_vle16ff_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff-optimized.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff-optimized.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff-optimized.c @@ -0,0 +1,344 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-O0 %s +// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -O2 | FileCheck --check-prefix=CHECK-RV64-O2 %s + +#include + +// CHECK-RV64-O0-LABEL: @func_return_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: ret i64* null +// +// CHECK-RV64-O2-LABEL: @func_return_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: ret i64* null +// +size_t *__attribute__((noinline)) func_return_nullptr() { + return NULL; +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_not_null( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_not_null( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[DOTNOT:%.*]] = icmp eq i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O2-NEXT: br i1 [[DOTNOT]], label [[NEWVL_END:%.*]], label [[NEWVL_STORE:%.*]] +// CHECK-RV64-O2: newvl_store: +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O2-NEXT: store i64 [[TMP3]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-O2-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O2: newvl_end: +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_not_null(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_not_null_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_not_null_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[DOTNOT:%.*]] = icmp eq i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O2-NEXT: br i1 [[DOTNOT]], label [[NEWVL_END:%.*]], label [[NEWVL_STORE:%.*]] +// CHECK-RV64-O2: newvl_store: +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O2-NEXT: store i64 [[TMP3]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-O2-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O2: newvl_end: +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_not_null_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_direct_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: br i1 false, label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP3]], i64* null, align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_direct_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_direct_nullptr(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl) { + return vlseg2e8ff_v_i8mf8(v0, v1, base, NULL, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_direct_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: br i1 false, label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP3]], i64* null, align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_direct_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_direct_nullptr_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, NULL, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_indirect_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* null, null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* null, align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_indirect_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_indirect_nullptr(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl) { + size_t *new_vl = NULL; + return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_indirect_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* null, null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* null, align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_indirect_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_indirect_nullptr_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + size_t *new_vl = NULL; + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_cross_func_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[CALL:%.*]] = call i64* @func_return_nullptr() +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[CALL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[CALL]], align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_cross_func_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_cross_func_nullptr(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl) { + size_t *new_vl = func_return_nullptr(); + return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_cross_func_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[CALL:%.*]] = call i64* @func_return_nullptr() +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[CALL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[CALL]], align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_cross_func_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_cross_func_nullptr_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + size_t *new_vl = func_return_nullptr(); + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_known_not_null( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[NEW_VL:%.*]] = alloca i64, align 8 +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: [[TMP5:%.*]] = load i64, i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: store i64 [[TMP5]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_known_not_null( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O2-NEXT: store i64 [[TMP3]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_known_not_null(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl, size_t *out_new_vl) { + size_t new_vl; + vlseg2e8ff_v_i8mf8(v0, v1, base, &new_vl, vl); + *out_new_vl = new_vl; +} + +// CHECK-RV64-O0-LABEL: @test_vlsegff_save_new_vl_to_known_not_null_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[NEW_VL:%.*]] = alloca i64, align 8 +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O0-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O0-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: [[TMP5:%.*]] = load i64, i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: store i64 [[TMP5]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O0-NEXT: ret void +// +// CHECK-RV64-O2-LABEL: @test_vlsegff_save_new_vl_to_known_not_null_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = tail call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-O2-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-O2-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-O2-NEXT: store i64 [[TMP3]], i64* [[OUT_NEW_VL:%.*]], align 8 +// CHECK-RV64-O2-NEXT: ret void +// +void test_vlsegff_save_new_vl_to_known_not_null_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl, size_t *out_new_vl) { + size_t new_vl; + vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, &new_vl, vl); + *out_new_vl = new_vl; +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c @@ -20,8 +20,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8( @@ -31,8 +36,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -48,8 +58,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8( @@ -61,8 +76,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -80,8 +100,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8( @@ -95,8 +120,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -116,8 +146,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8( @@ -133,8 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -156,8 +196,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8( @@ -175,8 +220,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -200,8 +250,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8( @@ -221,8 +276,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -248,8 +308,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8( @@ -271,8 +336,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -286,8 +356,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4( @@ -297,8 +372,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -314,8 +394,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4( @@ -327,8 +412,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -346,8 +436,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4( @@ -361,8 +456,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -382,8 +482,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4( @@ -399,8 +504,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -422,8 +532,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4( @@ -441,8 +556,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -466,8 +586,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4( @@ -487,8 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -514,8 +644,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4( @@ -537,8 +672,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -552,8 +692,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2( @@ -563,8 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -580,8 +730,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2( @@ -593,8 +748,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -612,8 +772,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2( @@ -627,8 +792,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -648,8 +818,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2( @@ -665,8 +840,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -688,8 +868,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2( @@ -707,8 +892,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -732,8 +922,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2( @@ -753,8 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -780,8 +980,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2( @@ -803,8 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -818,8 +1028,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1( @@ -829,8 +1044,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -846,8 +1066,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1( @@ -859,8 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -878,8 +1108,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1( @@ -893,8 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -914,8 +1154,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1( @@ -931,8 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -954,8 +1204,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1( @@ -973,8 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -998,8 +1258,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1( @@ -1019,8 +1284,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1046,8 +1316,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1( @@ -1069,8 +1344,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1084,8 +1364,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2( @@ -1095,8 +1380,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1112,8 +1402,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2( @@ -1125,8 +1420,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1144,8 +1444,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2( @@ -1159,8 +1464,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1174,8 +1484,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4( @@ -1185,8 +1500,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1200,8 +1520,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4( @@ -1211,8 +1536,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1228,8 +1558,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4( @@ -1241,8 +1576,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1260,8 +1600,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4( @@ -1275,8 +1620,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1296,8 +1646,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4( @@ -1313,8 +1668,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1336,8 +1696,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4( @@ -1355,8 +1720,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1380,8 +1750,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4( @@ -1401,8 +1776,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1428,8 +1808,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4( @@ -1451,8 +1836,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1466,8 +1856,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2( @@ -1477,8 +1872,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1494,8 +1894,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2( @@ -1507,8 +1912,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1526,8 +1936,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2( @@ -1541,8 +1956,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1562,8 +1982,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2( @@ -1579,8 +2004,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1602,8 +2032,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2( @@ -1621,8 +2056,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1646,8 +2086,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2( @@ -1667,8 +2112,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1694,8 +2144,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2( @@ -1717,8 +2172,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1732,8 +2192,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1( @@ -1743,8 +2208,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1760,8 +2230,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1( @@ -1773,8 +2248,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1792,8 +2272,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1( @@ -1807,8 +2292,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1828,8 +2318,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1( @@ -1845,8 +2340,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1868,8 +2368,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1( @@ -1887,8 +2392,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1912,8 +2422,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1( @@ -1933,8 +2448,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1960,8 +2480,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1( @@ -1983,8 +2508,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1998,8 +2528,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2( @@ -2009,8 +2544,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2026,8 +2566,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2( @@ -2039,8 +2584,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2058,8 +2608,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2( @@ -2073,8 +2628,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2088,8 +2648,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4( @@ -2099,8 +2664,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2114,8 +2684,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2( @@ -2125,8 +2700,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2142,8 +2722,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2( @@ -2155,8 +2740,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2174,8 +2764,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2( @@ -2189,8 +2784,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2210,8 +2810,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2( @@ -2227,8 +2832,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2250,8 +2860,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2( @@ -2269,8 +2884,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2294,8 +2914,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2( @@ -2315,8 +2940,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2342,8 +2972,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2( @@ -2365,8 +3000,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2380,8 +3020,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1( @@ -2391,8 +3036,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2408,8 +3058,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1( @@ -2421,8 +3076,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2440,8 +3100,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1( @@ -2455,8 +3120,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2476,8 +3146,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1( @@ -2493,8 +3168,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2516,8 +3196,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1( @@ -2535,8 +3220,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2560,8 +3250,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1( @@ -2581,8 +3276,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2608,8 +3308,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1( @@ -2631,8 +3336,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2646,8 +3356,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2( @@ -2657,8 +3372,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2674,8 +3394,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2( @@ -2687,8 +3412,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2706,8 +3436,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2( @@ -2721,8 +3456,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2736,8 +3476,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4( @@ -2747,8 +3492,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2762,8 +3512,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1( @@ -2773,8 +3528,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2790,8 +3550,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1( @@ -2803,8 +3568,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2822,8 +3592,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1( @@ -2837,8 +3612,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2858,8 +3638,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1( @@ -2875,8 +3660,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2898,8 +3688,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1( @@ -2917,8 +3712,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2942,8 +3742,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1( @@ -2963,8 +3768,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2990,8 +3800,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1( @@ -3013,8 +3828,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3028,8 +3848,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2( @@ -3039,8 +3864,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3056,8 +3886,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2( @@ -3069,8 +3904,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3088,8 +3928,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2( @@ -3103,8 +3948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3118,8 +3968,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4( @@ -3129,8 +3984,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3144,8 +4004,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8( @@ -3155,8 +4020,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3172,8 +4042,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8( @@ -3185,8 +4060,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3204,8 +4084,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8( @@ -3219,8 +4104,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3240,8 +4130,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8( @@ -3257,8 +4152,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3280,8 +4180,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8( @@ -3299,8 +4204,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3324,8 +4234,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8( @@ -3345,8 +4260,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3372,8 +4292,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8( @@ -3395,8 +4320,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3410,8 +4340,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4( @@ -3421,8 +4356,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3438,8 +4378,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4( @@ -3451,8 +4396,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3470,8 +4420,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4( @@ -3485,8 +4440,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3506,8 +4466,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4( @@ -3523,8 +4488,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3546,8 +4516,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4( @@ -3565,8 +4540,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3590,8 +4570,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4( @@ -3611,8 +4596,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3638,8 +4628,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4( @@ -3661,8 +4656,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3676,8 +4676,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2( @@ -3687,8 +4692,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3704,8 +4714,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2( @@ -3717,8 +4732,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3736,8 +4756,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2( @@ -3751,8 +4776,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3772,8 +4802,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2( @@ -3789,8 +4824,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3812,8 +4852,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2( @@ -3831,8 +4876,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3856,8 +4906,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2( @@ -3877,8 +4932,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3904,8 +4964,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2( @@ -3927,8 +4992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3942,8 +5012,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1( @@ -3953,8 +5028,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3970,8 +5050,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1( @@ -3983,8 +5068,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4002,8 +5092,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1( @@ -4017,8 +5112,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4038,8 +5138,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1( @@ -4055,8 +5160,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4078,8 +5188,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1( @@ -4097,8 +5212,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4122,8 +5242,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1( @@ -4143,8 +5268,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4170,8 +5300,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1( @@ -4193,8 +5328,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4208,8 +5348,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2( @@ -4219,8 +5364,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4236,8 +5386,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2( @@ -4249,8 +5404,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4268,8 +5428,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2( @@ -4283,8 +5448,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4298,8 +5468,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4( @@ -4309,8 +5484,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4324,8 +5504,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4( @@ -4335,8 +5520,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4352,8 +5542,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4( @@ -4365,8 +5560,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4384,8 +5584,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4( @@ -4399,8 +5604,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4420,8 +5630,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4( @@ -4437,8 +5652,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4460,8 +5680,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4( @@ -4479,8 +5704,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4504,8 +5734,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4( @@ -4525,8 +5760,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4552,8 +5792,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4( @@ -4575,8 +5820,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4590,8 +5840,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2( @@ -4601,8 +5856,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4618,8 +5878,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2( @@ -4631,8 +5896,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4650,8 +5920,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2( @@ -4665,8 +5940,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4686,8 +5966,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2( @@ -4703,8 +5988,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4726,8 +6016,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2( @@ -4745,8 +6040,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4770,8 +6070,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2( @@ -4791,8 +6096,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4818,8 +6128,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2( @@ -4841,8 +6156,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4856,8 +6176,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1( @@ -4867,8 +6192,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4884,8 +6214,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1( @@ -4897,8 +6232,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4916,8 +6256,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1( @@ -4931,8 +6276,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4952,8 +6302,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1( @@ -4969,8 +6324,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4992,8 +6352,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1( @@ -5011,8 +6376,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5036,8 +6406,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1( @@ -5057,8 +6432,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5084,8 +6464,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1( @@ -5107,8 +6492,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5122,8 +6512,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2( @@ -5133,8 +6528,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5150,8 +6550,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2( @@ -5163,8 +6568,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5182,8 +6592,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2( @@ -5197,8 +6612,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5212,8 +6632,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4( @@ -5223,8 +6648,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5238,8 +6668,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2( @@ -5249,8 +6684,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5266,8 +6706,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2( @@ -5279,8 +6724,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5298,8 +6748,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2( @@ -5313,8 +6768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5334,8 +6794,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2( @@ -5351,8 +6816,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5374,8 +6844,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2( @@ -5393,8 +6868,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5418,8 +6898,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2( @@ -5439,8 +6924,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5466,8 +6956,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2( @@ -5489,8 +6984,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5504,8 +7004,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1( @@ -5515,8 +7020,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5532,8 +7042,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1( @@ -5545,8 +7060,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5564,8 +7084,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1( @@ -5579,8 +7104,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5600,8 +7130,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1( @@ -5617,8 +7152,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5640,8 +7180,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1( @@ -5659,8 +7204,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5684,8 +7234,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1( @@ -5705,8 +7260,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5732,8 +7292,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1( @@ -5755,8 +7320,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5770,8 +7340,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2( @@ -5781,8 +7356,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5798,8 +7378,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2( @@ -5811,8 +7396,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5830,8 +7420,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2( @@ -5845,8 +7440,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5860,8 +7460,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4( @@ -5871,8 +7476,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5886,8 +7496,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1( @@ -5897,8 +7512,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -5914,8 +7534,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1( @@ -5927,8 +7552,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -5946,8 +7576,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1( @@ -5961,8 +7596,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -5982,8 +7622,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1( @@ -5999,8 +7644,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6022,8 +7672,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1( @@ -6041,8 +7696,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6066,8 +7726,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1( @@ -6087,8 +7752,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6114,8 +7784,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1( @@ -6137,8 +7812,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6152,8 +7832,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2( @@ -6163,8 +7848,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6180,8 +7870,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2( @@ -6193,8 +7888,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6212,8 +7912,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2( @@ -6227,8 +7932,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6242,8 +7952,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4( @@ -6253,8 +7968,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6268,8 +7988,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2( @@ -6279,8 +8004,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t *new_vl, size_t vl) { @@ -6296,8 +8026,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2( @@ -6309,8 +8044,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t *new_vl, size_t vl) { @@ -6328,8 +8068,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2( @@ -6343,8 +8088,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t *new_vl, size_t vl) { @@ -6364,8 +8114,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2( @@ -6381,8 +8136,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t *new_vl, size_t vl) { @@ -6404,8 +8164,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2( @@ -6423,8 +8188,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t *new_vl, size_t vl) { @@ -6448,8 +8218,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2( @@ -6469,8 +8244,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t *new_vl, size_t vl) { @@ -6496,8 +8276,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2( @@ -6519,8 +8304,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t *new_vl, size_t vl) { @@ -6534,8 +8324,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1( @@ -6545,8 +8340,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t *new_vl, size_t vl) { @@ -6562,8 +8362,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1( @@ -6575,8 +8380,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t *new_vl, size_t vl) { @@ -6594,8 +8404,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1( @@ -6609,8 +8424,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t *new_vl, size_t vl) { @@ -6630,8 +8450,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1( @@ -6647,8 +8472,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t *new_vl, size_t vl) { @@ -6670,8 +8500,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1( @@ -6689,8 +8524,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t *new_vl, size_t vl) { @@ -6714,8 +8554,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1( @@ -6735,8 +8580,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t *new_vl, size_t vl) { @@ -6762,8 +8612,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1( @@ -6785,8 +8640,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t *new_vl, size_t vl) { @@ -6800,8 +8660,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2( @@ -6811,8 +8676,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t *new_vl, size_t vl) { @@ -6828,8 +8698,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2( @@ -6841,8 +8716,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t *new_vl, size_t vl) { @@ -6860,8 +8740,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2( @@ -6875,8 +8760,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t *new_vl, size_t vl) { @@ -6890,8 +8780,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4( @@ -6901,8 +8796,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t *new_vl, size_t vl) { @@ -6916,8 +8816,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1( @@ -6927,8 +8832,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t *new_vl, size_t vl) { @@ -6944,8 +8854,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1( @@ -6957,8 +8872,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t *new_vl, size_t vl) { @@ -6976,8 +8896,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1( @@ -6991,8 +8916,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t *new_vl, size_t vl) { @@ -7012,8 +8942,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1( @@ -7029,8 +8964,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t *new_vl, size_t vl) { @@ -7052,8 +8992,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1( @@ -7071,8 +9016,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t *new_vl, size_t vl) { @@ -7096,8 +9046,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1( @@ -7117,8 +9072,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t *new_vl, size_t vl) { @@ -7144,8 +9104,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1( @@ -7167,8 +9132,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t *new_vl, size_t vl) { @@ -7182,8 +9152,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2( @@ -7193,8 +9168,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t *new_vl, size_t vl) { @@ -7210,8 +9190,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2( @@ -7223,8 +9208,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t *new_vl, size_t vl) { @@ -7242,8 +9232,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2( @@ -7257,8 +9252,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t *new_vl, size_t vl) { @@ -7272,8 +9272,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4( @@ -7283,8 +9288,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t *new_vl, size_t vl) { @@ -7298,8 +9308,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4( @@ -7309,8 +9324,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7326,8 +9346,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4( @@ -7339,8 +9364,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7358,8 +9388,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4( @@ -7373,8 +9408,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7394,8 +9434,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4( @@ -7411,8 +9456,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7434,8 +9484,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4( @@ -7453,8 +9508,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7478,8 +9538,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4( @@ -7499,8 +9564,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7526,8 +9596,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4( @@ -7549,8 +9624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7564,8 +9644,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( @@ -7575,8 +9660,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7592,8 +9682,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( @@ -7605,8 +9700,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7624,8 +9724,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( @@ -7639,8 +9744,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7660,8 +9770,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( @@ -7677,8 +9792,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7700,8 +9820,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( @@ -7719,8 +9844,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7744,8 +9874,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( @@ -7765,8 +9900,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7792,8 +9932,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( @@ -7815,8 +9960,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7830,8 +9980,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( @@ -7841,8 +9996,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7858,8 +10018,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( @@ -7871,8 +10036,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7890,8 +10060,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( @@ -7905,8 +10080,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7926,8 +10106,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( @@ -7943,8 +10128,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7966,8 +10156,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( @@ -7985,8 +10180,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8010,8 +10210,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( @@ -8031,8 +10236,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8058,8 +10268,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( @@ -8081,8 +10296,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8096,8 +10316,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( @@ -8107,8 +10332,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8124,8 +10354,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( @@ -8137,8 +10372,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8156,8 +10396,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( @@ -8171,8 +10416,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8186,8 +10436,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( @@ -8197,8 +10452,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff_mask.c @@ -20,8 +20,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( @@ -31,8 +36,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -48,8 +58,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( @@ -61,8 +76,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -80,8 +100,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( @@ -95,8 +120,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -116,8 +146,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( @@ -133,8 +168,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -156,8 +196,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( @@ -175,8 +220,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -200,8 +250,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( @@ -221,8 +276,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -248,8 +308,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( @@ -271,8 +336,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -286,8 +356,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( @@ -297,8 +372,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -314,8 +394,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( @@ -327,8 +412,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -346,8 +436,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( @@ -361,8 +456,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -382,8 +482,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( @@ -399,8 +504,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -422,8 +532,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( @@ -441,8 +556,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -466,8 +586,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( @@ -487,8 +612,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -514,8 +644,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( @@ -537,8 +672,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -552,8 +692,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( @@ -563,8 +708,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -580,8 +730,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( @@ -593,8 +748,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -612,8 +772,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( @@ -627,8 +792,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -648,8 +818,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( @@ -665,8 +840,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -688,8 +868,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( @@ -707,8 +892,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -732,8 +922,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( @@ -753,8 +948,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -780,8 +980,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( @@ -803,8 +1008,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -818,8 +1028,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( @@ -829,8 +1044,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -846,8 +1066,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( @@ -859,8 +1084,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -878,8 +1108,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( @@ -893,8 +1128,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -914,8 +1154,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( @@ -931,8 +1176,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { @@ -954,8 +1204,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( @@ -973,8 +1228,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { @@ -998,8 +1258,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( @@ -1019,8 +1284,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1046,8 +1316,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( @@ -1069,8 +1344,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1084,8 +1364,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( @@ -1095,8 +1380,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1112,8 +1402,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( @@ -1125,8 +1420,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1144,8 +1444,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( @@ -1159,8 +1464,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1174,8 +1484,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( @@ -1185,8 +1500,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { @@ -1200,8 +1520,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( @@ -1211,8 +1536,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1228,8 +1558,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( @@ -1241,8 +1576,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1260,8 +1600,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( @@ -1275,8 +1620,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1296,8 +1646,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( @@ -1313,8 +1668,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1336,8 +1696,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( @@ -1355,8 +1720,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1380,8 +1750,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( @@ -1401,8 +1776,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1428,8 +1808,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( @@ -1451,8 +1836,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1466,8 +1856,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( @@ -1477,8 +1872,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1494,8 +1894,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( @@ -1507,8 +1912,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1526,8 +1936,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( @@ -1541,8 +1956,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1562,8 +1982,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( @@ -1579,8 +2004,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1602,8 +2032,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( @@ -1621,8 +2056,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1646,8 +2086,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( @@ -1667,8 +2112,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1694,8 +2144,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( @@ -1717,8 +2172,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1732,8 +2192,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( @@ -1743,8 +2208,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1760,8 +2230,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( @@ -1773,8 +2248,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1792,8 +2272,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( @@ -1807,8 +2292,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1828,8 +2318,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( @@ -1845,8 +2340,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1868,8 +2368,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( @@ -1887,8 +2392,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1912,8 +2422,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( @@ -1933,8 +2448,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1960,8 +2480,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( @@ -1983,8 +2508,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { @@ -1998,8 +2528,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( @@ -2009,8 +2544,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2026,8 +2566,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( @@ -2039,8 +2584,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2058,8 +2608,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( @@ -2073,8 +2628,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2088,8 +2648,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( @@ -2099,8 +2664,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { @@ -2114,8 +2684,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( @@ -2125,8 +2700,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2142,8 +2722,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( @@ -2155,8 +2740,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2174,8 +2764,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( @@ -2189,8 +2784,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2210,8 +2810,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( @@ -2227,8 +2832,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2250,8 +2860,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( @@ -2269,8 +2884,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2294,8 +2914,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( @@ -2315,8 +2940,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2342,8 +2972,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( @@ -2365,8 +3000,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2380,8 +3020,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( @@ -2391,8 +3036,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2408,8 +3058,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( @@ -2421,8 +3076,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2440,8 +3100,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( @@ -2455,8 +3120,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2476,8 +3146,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( @@ -2493,8 +3168,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2516,8 +3196,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( @@ -2535,8 +3220,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2560,8 +3250,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( @@ -2581,8 +3276,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2608,8 +3308,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( @@ -2631,8 +3336,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2646,8 +3356,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( @@ -2657,8 +3372,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2674,8 +3394,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( @@ -2687,8 +3412,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2706,8 +3436,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( @@ -2721,8 +3456,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2736,8 +3476,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( @@ -2747,8 +3492,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { @@ -2762,8 +3512,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( @@ -2773,8 +3528,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2790,8 +3550,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( @@ -2803,8 +3568,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2822,8 +3592,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( @@ -2837,8 +3612,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2858,8 +3638,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( @@ -2875,8 +3660,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2898,8 +3688,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( @@ -2917,8 +3712,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2942,8 +3742,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( @@ -2963,8 +3768,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { @@ -2990,8 +3800,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( @@ -3013,8 +3828,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3028,8 +3848,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( @@ -3039,8 +3864,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3056,8 +3886,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( @@ -3069,8 +3904,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3088,8 +3928,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( @@ -3103,8 +3948,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3118,8 +3968,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( @@ -3129,8 +3984,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { @@ -3144,8 +4004,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( @@ -3155,8 +4020,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3172,8 +4042,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( @@ -3185,8 +4060,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3204,8 +4084,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( @@ -3219,8 +4104,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3240,8 +4130,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( @@ -3257,8 +4152,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3280,8 +4180,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( @@ -3299,8 +4204,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3324,8 +4234,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( @@ -3345,8 +4260,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3372,8 +4292,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( @@ -3395,8 +4320,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3410,8 +4340,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( @@ -3421,8 +4356,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3438,8 +4378,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( @@ -3451,8 +4396,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3470,8 +4420,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( @@ -3485,8 +4440,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3506,8 +4466,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( @@ -3523,8 +4488,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3546,8 +4516,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( @@ -3565,8 +4540,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3590,8 +4570,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( @@ -3611,8 +4596,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3638,8 +4628,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( @@ -3661,8 +4656,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3676,8 +4676,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( @@ -3687,8 +4692,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3704,8 +4714,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( @@ -3717,8 +4732,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3736,8 +4756,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( @@ -3751,8 +4776,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3772,8 +4802,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( @@ -3789,8 +4824,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3812,8 +4852,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( @@ -3831,8 +4876,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3856,8 +4906,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( @@ -3877,8 +4932,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3904,8 +4964,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( @@ -3927,8 +4992,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3942,8 +5012,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( @@ -3953,8 +5028,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -3970,8 +5050,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( @@ -3983,8 +5068,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4002,8 +5092,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( @@ -4017,8 +5112,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4038,8 +5138,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( @@ -4055,8 +5160,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4078,8 +5188,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( @@ -4097,8 +5212,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4122,8 +5242,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( @@ -4143,8 +5268,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4170,8 +5300,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( @@ -4193,8 +5328,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4208,8 +5348,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( @@ -4219,8 +5364,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4236,8 +5386,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( @@ -4249,8 +5404,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4268,8 +5428,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( @@ -4283,8 +5448,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4298,8 +5468,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 1 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( @@ -4309,8 +5484,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 1 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -4324,8 +5504,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( @@ -4335,8 +5520,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4352,8 +5542,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( @@ -4365,8 +5560,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4384,8 +5584,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( @@ -4399,8 +5604,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4420,8 +5630,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( @@ -4437,8 +5652,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4460,8 +5680,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( @@ -4479,8 +5704,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4504,8 +5734,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( @@ -4525,8 +5760,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4552,8 +5792,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( @@ -4575,8 +5820,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4590,8 +5840,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( @@ -4601,8 +5856,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4618,8 +5878,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( @@ -4631,8 +5896,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4650,8 +5920,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( @@ -4665,8 +5940,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4686,8 +5966,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( @@ -4703,8 +5988,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4726,8 +6016,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( @@ -4745,8 +6040,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4770,8 +6070,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( @@ -4791,8 +6096,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4818,8 +6128,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( @@ -4841,8 +6156,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4856,8 +6176,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( @@ -4867,8 +6192,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4884,8 +6214,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( @@ -4897,8 +6232,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4916,8 +6256,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( @@ -4931,8 +6276,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4952,8 +6302,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( @@ -4969,8 +6324,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -4992,8 +6352,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( @@ -5011,8 +6376,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5036,8 +6406,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( @@ -5057,8 +6432,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5084,8 +6464,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( @@ -5107,8 +6492,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5122,8 +6512,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( @@ -5133,8 +6528,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5150,8 +6550,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( @@ -5163,8 +6568,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5182,8 +6592,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( @@ -5197,8 +6612,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5212,8 +6632,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( @@ -5223,8 +6648,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -5238,8 +6668,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( @@ -5249,8 +6684,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5266,8 +6706,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( @@ -5279,8 +6724,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5298,8 +6748,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( @@ -5313,8 +6768,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5334,8 +6794,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( @@ -5351,8 +6816,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5374,8 +6844,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( @@ -5393,8 +6868,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5418,8 +6898,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( @@ -5439,8 +6924,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5466,8 +6956,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( @@ -5489,8 +6984,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5504,8 +7004,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( @@ -5515,8 +7020,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5532,8 +7042,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( @@ -5545,8 +7060,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5564,8 +7084,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( @@ -5579,8 +7104,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5600,8 +7130,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( @@ -5617,8 +7152,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5640,8 +7180,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( @@ -5659,8 +7204,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5684,8 +7234,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( @@ -5705,8 +7260,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5732,8 +7292,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( @@ -5755,8 +7320,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5770,8 +7340,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( @@ -5781,8 +7356,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5798,8 +7378,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( @@ -5811,8 +7396,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5830,8 +7420,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( @@ -5845,8 +7440,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5860,8 +7460,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( @@ -5871,8 +7476,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -5886,8 +7496,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( @@ -5897,8 +7512,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -5914,8 +7534,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( @@ -5927,8 +7552,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -5946,8 +7576,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( @@ -5961,8 +7596,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -5982,8 +7622,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( @@ -5999,8 +7644,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6022,8 +7672,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( @@ -6041,8 +7696,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6066,8 +7726,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( @@ -6087,8 +7752,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6114,8 +7784,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( @@ -6137,8 +7812,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6152,8 +7832,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( @@ -6163,8 +7848,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6180,8 +7870,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( @@ -6193,8 +7888,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6212,8 +7912,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( @@ -6227,8 +7932,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6242,8 +7952,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( @@ -6253,8 +7968,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -6268,8 +7988,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( @@ -6279,8 +8004,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { @@ -6296,8 +8026,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( @@ -6309,8 +8044,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { @@ -6328,8 +8068,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( @@ -6343,8 +8088,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { @@ -6364,8 +8114,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( @@ -6381,8 +8136,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { @@ -6404,8 +8164,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( @@ -6423,8 +8188,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { @@ -6448,8 +8218,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( @@ -6469,8 +8244,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { @@ -6496,8 +8276,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( @@ -6519,8 +8304,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { @@ -6534,8 +8324,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( @@ -6545,8 +8340,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { @@ -6562,8 +8362,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( @@ -6575,8 +8380,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { @@ -6594,8 +8404,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( @@ -6609,8 +8424,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { @@ -6630,8 +8450,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( @@ -6647,8 +8472,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { @@ -6670,8 +8500,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( @@ -6689,8 +8524,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { @@ -6714,8 +8554,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( @@ -6735,8 +8580,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { @@ -6762,8 +8612,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( @@ -6785,8 +8640,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { @@ -6800,8 +8660,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( @@ -6811,8 +8676,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { @@ -6828,8 +8698,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( @@ -6841,8 +8716,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { @@ -6860,8 +8740,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( @@ -6875,8 +8760,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { @@ -6890,8 +8780,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 4 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( @@ -6901,8 +8796,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 4 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { @@ -6916,8 +8816,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( @@ -6927,8 +8832,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { @@ -6944,8 +8854,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( @@ -6957,8 +8872,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { @@ -6976,8 +8896,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( @@ -6991,8 +8916,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { @@ -7012,8 +8942,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( @@ -7029,8 +8964,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { @@ -7052,8 +8992,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( @@ -7071,8 +9016,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { @@ -7096,8 +9046,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( @@ -7117,8 +9072,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { @@ -7144,8 +9104,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( @@ -7167,8 +9132,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { @@ -7182,8 +9152,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( @@ -7193,8 +9168,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { @@ -7210,8 +9190,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( @@ -7223,8 +9208,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { @@ -7242,8 +9232,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( @@ -7257,8 +9252,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { @@ -7272,8 +9272,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 8 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( @@ -7283,8 +9288,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { @@ -7298,8 +9308,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( @@ -7309,8 +9324,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7326,8 +9346,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( @@ -7339,8 +9364,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7358,8 +9388,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( @@ -7373,8 +9408,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7394,8 +9434,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( @@ -7411,8 +9456,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7434,8 +9484,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( @@ -7453,8 +9508,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7478,8 +9538,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( @@ -7499,8 +9564,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7526,8 +9596,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( @@ -7549,8 +9624,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7564,8 +9644,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( @@ -7575,8 +9660,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7592,8 +9682,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( @@ -7605,8 +9700,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7624,8 +9724,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( @@ -7639,8 +9744,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7660,8 +9770,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( @@ -7677,8 +9792,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7700,8 +9820,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( @@ -7719,8 +9844,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7744,8 +9874,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( @@ -7765,8 +9900,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7792,8 +9932,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( @@ -7815,8 +9960,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7830,8 +9980,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( @@ -7841,8 +9996,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7858,8 +10018,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( @@ -7871,8 +10036,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7890,8 +10060,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( @@ -7905,8 +10080,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7926,8 +10106,13 @@ // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP6:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 +// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( @@ -7943,8 +10128,13 @@ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 -// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP6]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -7966,8 +10156,13 @@ // CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP7:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 +// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( @@ -7985,8 +10180,13 @@ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 -// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP7]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8010,8 +10210,13 @@ // CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP8:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 +// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( @@ -8031,8 +10236,13 @@ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 -// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP8]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8058,8 +10268,13 @@ // CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 // CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP9:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 +// CHECK-RV32-NEXT: store i32 [[TMP10]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( @@ -8081,8 +10296,13 @@ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 -// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP9]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP10]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8096,8 +10316,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( @@ -8107,8 +10332,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8124,8 +10354,13 @@ // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP4:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 +// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( @@ -8137,8 +10372,13 @@ // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 -// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP4]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8156,8 +10396,13 @@ // CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 // CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP5:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 +// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( @@ -8171,8 +10416,13 @@ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 -// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP5]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -8186,8 +10436,13 @@ // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 // CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP3:%.*]] = icmp ne i32* [[NEW_VL:%.*]], null +// CHECK-RV32-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV32: newvl_store: +// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 +// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL]], align 2 +// CHECK-RV32-NEXT: br label [[NEWVL_END]] +// CHECK-RV32: newvl_end: // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( @@ -8197,8 +10452,13 @@ // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 2 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {