diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -643,10 +643,20 @@ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); // Store new_vl. + llvm::Value *Cmp = + Builder.CreateCmp(llvm::CmpInst::ICMP_NE, NewVL, + ConstantPointerNull::get( + cast(NewVL->getType()))); + BasicBlock *NewVLStore = createBasicBlock("newvl_store", this->CurFn); + BasicBlock *NewVLEnd = createBasicBlock("newvl_end", this->CurFn); + Builder.CreateCondBr(Cmp, NewVLStore, NewVLEnd); + Builder.SetInsertPoint(NewVLStore); clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType()); llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1}); Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + Builder.CreateBr(NewVLEnd); + Builder.SetInsertPoint(NewVLEnd); return V; } }], @@ -663,10 +673,20 @@ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); // Store new_vl. + llvm::Value *Cmp = + Builder.CreateCmp(llvm::CmpInst::ICMP_NE, NewVL, + ConstantPointerNull::get( + cast(NewVL->getType()))); + BasicBlock *NewVLStore = createBasicBlock("newvl_store", this->CurFn); + BasicBlock *NewVLEnd = createBasicBlock("newvl_end", this->CurFn); + Builder.CreateCondBr(Cmp, NewVLStore, NewVLEnd); + Builder.SetInsertPoint(NewVLStore); clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(3)->getType()); llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1}); Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align)); + Builder.CreateBr(NewVLEnd); + Builder.SetInsertPoint(NewVLEnd); return V; } }] in { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff-optimized.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff-optimized.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff-optimized.c @@ -0,0 +1,240 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-O0 %s +// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -O2 | FileCheck --check-prefix=CHECK-RV64-O2 %s + +#include + +// CHECK-RV64-O0-LABEL: @func_return_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: ret i64* null +// +// CHECK-RV64-O2-LABEL: @func_return_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: ret i64* null +// +size_t *func_return_nullptr() { + return NULL; +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_not_null( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_not_null( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[DOTNOT:%.*]] = icmp eq i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O2-NEXT: br i1 [[DOTNOT]], label [[NEWVL_END:%.*]], label [[NEWVL_STORE:%.*]] +// CHECK-RV64-O2: newvl_store: +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O2-NEXT: store i64 [[TMP2]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O2-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O2: newvl_end: +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP3]] +// +vint8mf8_t test_vleff_save_new_vl_to_not_null(const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf8(base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_not_null_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_not_null_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[DOTNOT:%.*]] = icmp eq i64* [[NEW_VL:%.*]], null +// CHECK-RV64-O2-NEXT: br i1 [[DOTNOT]], label [[NEWVL_END:%.*]], label [[NEWVL_STORE:%.*]] +// CHECK-RV64-O2: newvl_store: +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O2-NEXT: store i64 [[TMP2]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-O2-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O2: newvl_end: +// CHECK-RV64-O2-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP3]] +// +vint8mf8_t test_vleff_save_new_vl_to_not_null_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { + return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_direct_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: br i1 false, label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP3]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_direct_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_direct_nullptr(const int8_t *base, size_t vl) { + return vle8ff_v_i8mf8(base, NULL, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_direct_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: br i1 false, label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP3]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_direct_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_direct_nullptr_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + return vle8ff_v_i8mf8_m(mask, maskedoff, base, NULL, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* null, null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_indirect_nullptr(const int8_t *base, size_t vl) { + size_t *new_vl = NULL; + return vle8ff_v_i8mf8(base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* null, null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* null, align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_indirect_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_indirect_nullptr_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + size_t *new_vl = NULL; + return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[CALL:%.*]] = call i64* @func_return_nullptr() +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[CALL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[CALL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_cross_func_nullptr(const int8_t *base, size_t vl) { + size_t *new_vl = func_return_nullptr(); + return vle8ff_v_i8mf8(base, new_vl, vl); +} + +// CHECK-RV64-O0-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr_m( +// CHECK-RV64-O0-NEXT: entry: +// CHECK-RV64-O0-NEXT: [[CALL:%.*]] = call i64* @func_return_nullptr() +// CHECK-RV64-O0-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O0-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O0-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O0-NEXT: [[TMP3:%.*]] = icmp ne i64* [[CALL]], null +// CHECK-RV64-O0-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64-O0: newvl_store: +// CHECK-RV64-O0-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-O0-NEXT: store i64 [[TMP4]], i64* [[CALL]], align 8 +// CHECK-RV64-O0-NEXT: br label [[NEWVL_END]] +// CHECK-RV64-O0: newvl_end: +// CHECK-RV64-O0-NEXT: ret [[TMP2]] +// +// CHECK-RV64-O2-LABEL: @test_vleff_save_new_vl_to_cross_func_nullptr_m( +// CHECK-RV64-O2-NEXT: entry: +// CHECK-RV64-O2-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-O2-NEXT: [[TMP1:%.*]] = tail call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-O2-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-O2-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vleff_save_new_vl_to_cross_func_nullptr_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + size_t *new_vl = func_return_nullptr(); + return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c @@ -11,8 +11,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf8_t test_vle8ff_v_i8mf8 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -24,8 +29,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf4_t test_vle8ff_v_i8mf4 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -37,8 +47,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf2_t test_vle8ff_v_i8mf2 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -50,8 +65,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m1_t test_vle8ff_v_i8m1 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -63,8 +83,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m2_t test_vle8ff_v_i8m2 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -76,8 +101,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m4_t test_vle8ff_v_i8m4 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -89,8 +119,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m8_t test_vle8ff_v_i8m8 (const int8_t *base, size_t *new_vl, size_t vl) { @@ -102,8 +137,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf8_t test_vle8ff_v_u8mf8 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -115,8 +155,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf4_t test_vle8ff_v_u8mf4 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -128,8 +173,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf2_t test_vle8ff_v_u8mf2 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -141,8 +191,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m1_t test_vle8ff_v_u8m1 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -154,8 +209,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m2_t test_vle8ff_v_u8m2 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -167,8 +227,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m4_t test_vle8ff_v_u8m4 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -180,8 +245,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m8_t test_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl) { @@ -193,8 +263,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf8_t test_vle8ff_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -206,8 +281,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf4_t test_vle8ff_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -219,8 +299,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8mf2_t test_vle8ff_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -232,8 +317,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m1_t test_vle8ff_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -245,8 +335,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m2_t test_vle8ff_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -258,8 +353,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m4_t test_vle8ff_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -271,8 +371,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint8m8_t test_vle8ff_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { @@ -284,8 +389,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf8_t test_vle8ff_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -297,8 +407,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf4_t test_vle8ff_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -310,8 +425,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8mf2_t test_vle8ff_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -323,8 +443,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m1_t test_vle8ff_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -336,8 +461,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m2_t test_vle8ff_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -349,8 +479,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m4_t test_vle8ff_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -362,8 +497,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint8m8_t test_vle8ff_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { @@ -375,8 +515,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf4_t test_vle16ff_v_i16mf4 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -388,8 +533,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf2_t test_vle16ff_v_i16mf2 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -401,8 +551,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m1_t test_vle16ff_v_i16m1 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -414,8 +569,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m2_t test_vle16ff_v_i16m2 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -427,8 +587,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m4_t test_vle16ff_v_i16m4 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -440,8 +605,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m8_t test_vle16ff_v_i16m8 (const int16_t *base, size_t *new_vl, size_t vl) { @@ -453,8 +623,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf4_t test_vle16ff_v_u16mf4 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -466,8 +641,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf2_t test_vle16ff_v_u16mf2 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -479,8 +659,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m1_t test_vle16ff_v_u16m1 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -492,8 +677,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m2_t test_vle16ff_v_u16m2 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -505,8 +695,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m4_t test_vle16ff_v_u16m4 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -518,8 +713,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m8_t test_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t vl) { @@ -531,8 +731,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf4_t test_vle16ff_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -544,8 +749,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16mf2_t test_vle16ff_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -557,8 +767,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m1_t test_vle16ff_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -570,8 +785,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m2_t test_vle16ff_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -583,8 +803,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m4_t test_vle16ff_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -596,8 +821,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint16m8_t test_vle16ff_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { @@ -609,8 +839,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf4_t test_vle16ff_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -622,8 +857,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16mf2_t test_vle16ff_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -635,8 +875,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m1_t test_vle16ff_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -648,8 +893,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m2_t test_vle16ff_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -661,8 +911,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m4_t test_vle16ff_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -674,8 +929,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint16m8_t test_vle16ff_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { @@ -687,8 +947,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -700,8 +965,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -713,8 +983,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -726,8 +1001,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -739,8 +1019,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl) { @@ -752,8 +1037,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -765,8 +1055,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -778,8 +1073,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -791,8 +1091,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -804,8 +1109,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl) { @@ -817,8 +1127,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t vl) { @@ -830,8 +1145,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl) { @@ -843,8 +1163,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl) { @@ -856,8 +1181,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl) { @@ -869,8 +1199,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl) { @@ -882,8 +1217,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -895,8 +1235,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -908,8 +1253,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -921,8 +1271,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -934,8 +1289,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { @@ -947,8 +1307,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -960,8 +1325,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -973,8 +1343,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -986,8 +1361,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -999,8 +1379,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { @@ -1012,8 +1397,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1025,8 +1415,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1038,8 +1433,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1051,8 +1451,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1064,8 +1469,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { @@ -1077,8 +1487,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m1_t test_vle64ff_v_i64m1 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1090,8 +1505,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m2_t test_vle64ff_v_i64m2 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1103,8 +1523,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m4_t test_vle64ff_v_i64m4 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1116,8 +1541,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m8_t test_vle64ff_v_i64m8 (const int64_t *base, size_t *new_vl, size_t vl) { @@ -1129,8 +1559,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m1_t test_vle64ff_v_u64m1 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1142,8 +1577,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m2_t test_vle64ff_v_u64m2 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1155,8 +1595,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m4_t test_vle64ff_v_u64m4 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1168,8 +1613,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m8_t test_vle64ff_v_u64m8 (const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1181,8 +1631,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m1_t test_vle64ff_v_f64m1 (const double *base, size_t *new_vl, size_t vl) { @@ -1194,8 +1649,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m2_t test_vle64ff_v_f64m2 (const double *base, size_t *new_vl, size_t vl) { @@ -1207,8 +1667,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m4_t test_vle64ff_v_f64m4 (const double *base, size_t *new_vl, size_t vl) { @@ -1220,8 +1685,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m8_t test_vle64ff_v_f64m8 (const double *base, size_t *new_vl, size_t vl) { @@ -1233,8 +1703,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m1_t test_vle64ff_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1246,8 +1721,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m2_t test_vle64ff_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1259,8 +1739,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m4_t test_vle64ff_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1272,8 +1757,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vint64m8_t test_vle64ff_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { @@ -1285,8 +1775,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m1_t test_vle64ff_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1298,8 +1793,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m2_t test_vle64ff_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1311,8 +1811,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m4_t test_vle64ff_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1324,8 +1829,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vuint64m8_t test_vle64ff_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { @@ -1337,8 +1847,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m1_t test_vle64ff_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1350,8 +1865,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m2_t test_vle64ff_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1363,8 +1883,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m4_t test_vle64ff_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1376,8 +1901,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat64m8_t test_vle64ff_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { @@ -1389,8 +1919,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf4_t test_vle16ff_v_f16mf4 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1402,8 +1937,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf2_t test_vle16ff_v_f16mf2 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1415,8 +1955,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m1_t test_vle16ff_v_f16m1 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1428,8 +1973,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m2_t test_vle16ff_v_f16m2 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1441,8 +1991,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m4_t test_vle16ff_v_f16m4 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1454,8 +2009,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m8_t test_vle16ff_v_f16m8 (const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1467,8 +2027,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf4_t test_vle16ff_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1480,8 +2045,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16mf2_t test_vle16ff_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1493,8 +2063,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m1_t test_vle16ff_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1506,8 +2081,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m2_t test_vle16ff_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1519,8 +2099,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m4_t test_vle16ff_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { @@ -1532,8 +2117,13 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 -// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = icmp ne i64* [[NEW_VL:%.*]], null +// CHECK-RV64-NEXT: br i1 [[TMP3]], label [[NEWVL_STORE:%.*]], label [[NEWVL_END:%.*]] +// CHECK-RV64: newvl_store: +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL]], align 8 +// CHECK-RV64-NEXT: br label [[NEWVL_END]] +// CHECK-RV64: newvl_end: // CHECK-RV64-NEXT: ret [[TMP2]] // vfloat16m8_t test_vle16ff_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {