diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -3826,3 +3827,422 @@ return vloxei64(mask, maskedoff, base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -7478,3 +7478,1505 @@ void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c @@ -1,27 +1,12 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ -// RUN: -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ -// RUN: -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ +// RUN: -disable-O0-optnone -emit-llvm %s \ +// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -35,17 +20,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -61,19 +35,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -91,21 +52,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -125,23 +71,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -163,25 +92,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -205,27 +115,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -251,15 +140,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -273,17 +153,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -299,19 +168,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -329,21 +185,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -363,23 +204,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -401,25 +225,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -443,27 +248,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -489,15 +273,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -511,17 +286,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -537,19 +301,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -567,21 +318,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -601,23 +337,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -639,25 +358,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -681,27 +381,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -727,15 +406,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -749,17 +419,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -775,19 +434,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -805,21 +451,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -839,23 +470,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -877,25 +491,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -919,27 +514,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -965,15 +539,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -987,17 +552,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1013,19 +567,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1043,15 +584,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1065,15 +597,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1087,17 +610,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1113,19 +625,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1143,21 +642,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1177,23 +661,6 @@ return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1215,25 +682,6 @@ return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1257,27 +705,6 @@ return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1303,15 +730,6 @@ return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1325,17 +743,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1351,19 +758,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1381,21 +775,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1415,23 +794,6 @@ return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1453,25 +815,6 @@ return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1495,27 +838,6 @@ return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1541,15 +863,6 @@ return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1563,17 +876,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1589,19 +891,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1619,21 +908,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1653,23 +927,6 @@ return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1691,25 +948,6 @@ return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1733,27 +971,6 @@ return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1779,15 +996,6 @@ return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1801,17 +1009,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1827,19 +1024,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1857,15 +1041,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1879,15 +1054,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1901,17 +1067,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1927,19 +1082,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1957,21 +1099,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1991,23 +1118,6 @@ return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2029,25 +1139,6 @@ return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2071,27 +1162,6 @@ return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2117,15 +1187,6 @@ return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2139,17 +1200,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2165,19 +1215,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2195,21 +1232,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2229,23 +1251,6 @@ return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2267,25 +1272,6 @@ return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2309,27 +1295,6 @@ return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2355,15 +1320,6 @@ return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2377,17 +1333,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2403,19 +1348,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2433,15 +1365,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2455,15 +1378,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2477,17 +1391,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2503,19 +1406,6 @@ return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2533,21 +1423,6 @@ return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2567,23 +1442,6 @@ return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2605,25 +1463,6 @@ return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2647,27 +1486,6 @@ return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2693,15 +1511,6 @@ return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2715,17 +1524,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2741,19 +1539,6 @@ return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2771,15 +1556,6 @@ return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2793,15 +1569,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2815,17 +1582,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2841,19 +1597,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2871,21 +1614,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2905,23 +1633,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2943,25 +1654,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2985,27 +1677,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3031,15 +1702,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3053,17 +1715,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3079,19 +1730,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3109,21 +1747,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3143,23 +1766,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3181,25 +1787,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3223,27 +1810,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3269,15 +1835,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3291,17 +1848,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3317,19 +1863,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3347,21 +1880,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3381,23 +1899,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3419,25 +1920,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3461,27 +1943,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3507,15 +1968,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3529,17 +1981,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3555,19 +1996,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3585,21 +2013,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3619,23 +2032,6 @@ return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3657,25 +2053,6 @@ return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3699,27 +2076,6 @@ return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3745,15 +2101,6 @@ return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3767,17 +2114,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3793,19 +2129,6 @@ return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3823,15 +2146,6 @@ return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3845,15 +2159,6 @@ return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3867,17 +2172,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3893,19 +2187,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3923,21 +2204,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3957,23 +2223,6 @@ return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -3995,25 +2244,6 @@ return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4037,27 +2267,6 @@ return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4083,15 +2292,6 @@ return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4105,17 +2305,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4131,19 +2320,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4161,21 +2337,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4195,23 +2356,6 @@ return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4233,25 +2377,6 @@ return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4275,27 +2400,6 @@ return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4321,15 +2425,6 @@ return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4343,17 +2438,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4369,19 +2453,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4399,21 +2470,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4433,23 +2489,6 @@ return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4471,25 +2510,6 @@ return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4513,27 +2533,6 @@ return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4559,15 +2558,6 @@ return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4581,17 +2571,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4607,19 +2586,6 @@ return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4637,15 +2603,6 @@ return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4659,15 +2616,6 @@ return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4681,17 +2629,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4707,19 +2644,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4737,21 +2661,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4771,23 +2680,6 @@ return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4809,25 +2701,6 @@ return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4851,27 +2724,6 @@ return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4897,15 +2749,6 @@ return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4919,17 +2762,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4945,19 +2777,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -4975,21 +2794,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5009,23 +2813,6 @@ return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5047,25 +2834,6 @@ return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5089,27 +2857,6 @@ return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5135,15 +2882,6 @@ return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5157,17 +2895,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5183,19 +2910,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5213,15 +2927,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5235,15 +2940,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5257,17 +2953,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5283,19 +2968,6 @@ return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5313,21 +2985,6 @@ return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5347,23 +3004,6 @@ return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5385,25 +3025,6 @@ return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5427,27 +3048,6 @@ return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5473,15 +3073,6 @@ return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5495,17 +3086,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5521,19 +3101,6 @@ return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5551,15 +3118,6 @@ return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5573,15 +3131,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5595,17 +3144,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5621,19 +3159,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5651,21 +3176,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5685,23 +3195,6 @@ return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5723,25 +3216,6 @@ return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5765,27 +3239,6 @@ return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5811,15 +3264,6 @@ return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5833,17 +3277,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5859,19 +3292,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5889,21 +3309,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5923,23 +3328,6 @@ return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -5961,25 +3349,6 @@ return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6003,27 +3372,6 @@ return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6049,15 +3397,6 @@ return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6071,17 +3410,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6097,19 +3425,6 @@ return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6127,15 +3442,6 @@ return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6149,15 +3455,6 @@ return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6171,17 +3468,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6197,19 +3483,6 @@ return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6227,21 +3500,6 @@ return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6261,23 +3519,6 @@ return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6299,25 +3540,6 @@ return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6341,27 +3563,6 @@ return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6387,15 +3588,6 @@ return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6409,17 +3601,6 @@ return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6435,19 +3616,6 @@ return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6465,15 +3633,6 @@ return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 0) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -6486,3 +3645,460 @@ void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -3646,3 +3646,459 @@ return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -3824,3 +3825,423 @@ vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { return vluxei64(mask, maskedoff, base, bindex, vl); } + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -7478,3 +7478,1505 @@ void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -4224,3 +4225,423 @@ vfloat64m8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxseg_mask.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -7134,3 +7134,894 @@ void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); } + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -3625,3 +3625,452 @@ return vsseg2e64(mask, base, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsseg5e16(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsseg5e16(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsseg2e16_v_f16m1(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsseg5e16(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsseg6e16(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsseg7e16(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsseg8e16(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsseg3e16(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsseg4e16(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m4(_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsseg2e16(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsseg5e16(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsseg6e16(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsseg7e16(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsseg8e16(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsseg3e16(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsseg4e16(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsseg2e16(mask, base, v0, v1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssseg.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -3625,3 +3625,452 @@ return vssseg2e64(mask, base, bstride, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vssseg5e16(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vssseg6e16(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vssseg7e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vssseg8e16(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vssseg3e16(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vssseg4e16(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vssseg2e16(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vssseg5e16(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vssseg6e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vssseg7e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vssseg8e16(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vssseg3e16(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vssseg4e16(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vssseg2e16(mask, base, bstride, v0, v1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -4224,3 +4225,423 @@ vfloat64m8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxseg_mask.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +v \ +// RUN: -target-feature +zfh -target-feature +experimental-zvfh -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s \ // RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s @@ -7134,3 +7134,894 @@ void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); } + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +}